This project will build off some of the concepts from the previous Lane Line finding project 1. It will use more advance techniques to fine tune the line detection algorythms to produce a much more clearer and richer visual experience to find lines accurately as best as possible with the tools we have learned thus far in the nanodegree program.
Below we will build a pipeline step by step via the following process:
# Imports of the main functions and libraries we will be using
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import pickle
import scipy
%matplotlib inline
#from tracker import tracker - only needed if this is in separate .py file
# General Function for Two Image Print Side-by-by
def print_images(image1, image1_title, image2, image2_title):
def correct_img(image):
if len(image.shape) > 2:
#To correct reverse colour issue in saved images do the following:
r,g,b = cv2.split(image) #Spliting out the channels of our result image
image = cv2.merge((b,g,r)) #Recombining in reverse colour order before saving
else:
pass
return image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# ax1.imshow(correct_img(image1))
ax1.imshow(correct_img(image1), cmap='gray')
ax1.set_title(image1_title, fontsize=40)
ax2.imshow(correct_img(image2), cmap='gray')
ax2.set_title(image2_title, fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
Step 1: We first need to calculate the correct camera matrix and distortion coefficients using the calibration chessboard images provided in the repository. Once we find these coefficients we can then use them to help us undistort images in the next section suffering from Radial and Tangential distortion.
#Function to calibrate a camera to obtain distortion coefficients from finding checkerboard corners
def camera_calibration(uncal_img):
img_size = (uncal_img.shape[1], uncal_img.shape[0])
#Map Image points to Object points (real world)
#Create arrays for both Image (2D) and Object(3D) points
objpoints = [] #creating a list as placeholder for 3D points of real space
imgpoints = [] #creating a list as placeholder for 2D points of img plane
# prepare object points
nx = 9 #number of inside corners in x
ny = 6 #number of inside corners in y
#Set Object points
objp = np.zeros((ny*nx,3), np.float32) #fill in points (0,0,0)..(1,0,0)… (last corner)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
#for all x,y fill in grid points, leave z coor. alone, transform back to x,y
# Convert to grayscale
gray = cv2.cvtColor(uncal_img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
#Add Found corners to img array
if ret == True:
imgpoints.append(corners) #If corners found, add those to img array
objpoints.append(objp) #add all objp points since they are all real data
#Drawing detected corners on an image:
cv2.drawChessboardCorners(uncal_img, (nx,ny), corners, ret)
write_name = 'data/output_images/corners_found.jpg'
cv2.imwrite(write_name, uncal_img)
#Perform camera calibration with given object and image points gathered above
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
#Save calibration output parameters in a pickle file for later use
dist_pickle = {}
dist_pickle['mtx'] = mtx
dist_pickle['dist'] = dist
pickle.dump(dist_pickle, open('data/camera_cal/calibration_pickle.p', 'wb'))
#Camera calibration calling function using uncalibrated image
def camera_cal_calling(filepath):
images = glob.glob(filepath)
for idx, fname in enumerate(images): #read in each image
uncal_img = mpimg.imread(filepath)
camera_calibration(uncal_img)
head, tail = os.path.split(fname)
write_name = 'data/output_images/undistort_'+str(idx+1)+'.jpg'
cv2.imwrite(write_name, undist) #Output image to file
print_images(distorted, 'Distorted Image '+tail, undist, 'Undistorted Image')
camera_cal_calling('data/camera_cal/calibration03.jpg')
#Print one result to verify calibration on image with founded corners was successful
uncal_img = mpimg.imread('data/camera_cal/calibration03.jpg')
cal_img = mpimg.imread('data/output_images/corners_found.jpg')
print_images(uncal_img, 'Uncalibrated Image', cal_img, 'Calibrated Image')
Now that we have calibrated our camera for radial and tangential distortion we can reuse those coefficients to undistort new images or frames of video.
Next we need to weave together a combination of methods (i.e., color transforms, gradients..etc.) to create a binary image containing the likelihood of the location of lane pixels and project that image back on our original image. This image will just be a visual verification that these pixels are identified as part of the lane lines.
We will proceed to build out the Advance lane line detection pipeleine. To see each step progress we will use a test image and analysis the results at each step of the way.
Step 2: To test the distortion matrix we found from our camera calibration step we will create a undistort function.
Here we will pass a distorted image with radial and tangential distortions. We will use our distortion parameters to undistort the image.
#Function to undistort an image with camera distortion coefficients already calculated
def cal_undistort(dist_image):
# Use cv2.calibrateCamera() and cv2.undistort() with previous objpoints & imgpoints
# Convert to grayscale
gray = cv2.cvtColor(dist_image, cv2.COLOR_RGB2GRAY)
# Get distortion coefficients from our pickle file
dist_pickle = pickle.load(open("data/camera_cal/calibration_pickle.p", 'rb'))
mtx = dist_pickle['mtx']
dist = dist_pickle['dist']
# Apply undistort function
# ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
undist_image = cv2.undistort(dist_image, mtx, dist, None, mtx)
return undist_image
#Undistort calling function for a distorted image using coefficients found from calibration images:
def undistort_calling(filepath):
images = glob.glob(filepath)
for idx, fname in enumerate(images): #read in each image
distorted = cv2.imread(fname)
head, tail = os.path.split(fname)
undist = cal_undistort(distorted)
write_name = 'data/output_images/undistort_'+str(idx+1)+'.jpg'
cv2.imwrite(write_name, undist) #Output image to file
print_images(distorted, 'Distorted Image '+tail, undist, 'Undistorted Image')
We will then use the distort images in the project test folder as a demonstration that our calibration is correct.
undistort_calling('data/images/road_under_signs_vehicles.png')
undistort_calling('data/road_test_images/test*.jpg')